# 0.2820599
#obtain placebo-based standard errors
(lambda_log <- attr(tau_log, "weights")$lambda)
names(lambda_log) <- pre_treatment_periods
print(lambda_log)
cbind(pre_treatment_periods,lambda_lev,lambda_log)
cbind(pre_treatment_periods,lambda_lev,round(lambda_log,3))
colnames(MM)<- c("Year","Lambda_Level","Lambda_Log")
#------------
# print out the time-weights for the level and log transformations
MM=cbind(pre_treatment_periods,lambda_lev,round(lambda_log,3))
colnames(MM)<- c("Year","Lambda_Level","Lambda_Log")
MM
800/32
1/32
25-(1/16)*96
25-(1/16)*480
#=========================================================================================>
# Authors: Gilles Koumou & Emmanuel Selorm Tsyawo
# Project: Difference-in-differences with as few as two cross-sectional units\\
#-- A new perspective to the democracy-growth debate
# Date begun: Sept. 13, 2022
# Date last updated:   Oct. 2, 2025
# Place:        Tuscaloosa
# Purpose: Main file for executing all code
#=========================================================================================>
rm(list=ls()) # clear entire memory
# library(tseries)
# library(lmtest)
# library(sandwich)
# library(parallel)
# library(pbapply)
# library(nlme)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) #set working directory path to source file
if (!dir.exists("output")) {
dir.create("output", recursive = TRUE)
}
#=========================================================================================>
#source("File_0_Fonctions.R")
#=========================================================================================>
#Comments:
#=========================================================================================>
#===============================================================================
start.time<- Sys.time()
#===============================================================================
#=========================================================================================>
#Simulations: mean bias, median absolute deviation, root-mean squared error, rejection rates
cat("Parallel Trends Illustration \n ")
source("File_1_PT_Illus.R") #Time difference of 50.64453 mins
#=========================================================================================>
#=========================================================================================>
#Simulations: mean bias, median absolute deviation, root-mean squared error, rejection rates
cat("Homogeneous treatment effects \n ")
source("File_3a_Sim.R") #Time difference of 50.64453 mins
#=========================================================================================>
#=========================================================================================>
#Simulations: mean bias, median absolute deviation, root-mean squared error, rejection rates
cat("Heterogeneous treatment effects \n ")
source("File_3b_Sim.R") #Time difference of 48.90229 mins
#=========================================================================================>
#=========================================================================================>
#Simulations: power curves for estimator based t-tests
cat("Power Curves - Homogeneous treatment effects \n ")
source("File_3c_Sim.R") #Time difference of 1.166494 hours
#=========================================================================================>
#=========================================================================================>
#Simulations: power curves for identification tests
cat("Idenfification testing \n ")
source("File_3d_Sim.R") #Time difference of 26.58319 mins
#=========================================================================================>
#=========================================================================================>
#Simulations: mean bias, median absolute deviation, root-mean squared error, rejection rates
cat("Homogeneous treatment effects; non-constant lambda \n ")
source("File_3e_Sim.R") #Time difference of 1.445081 hours
#=========================================================================================>
#=========================================================================================>
#Empirical Application
sink(file.path("output", "out_2a_Empirical.txt"))
source("File_2a_Empirical.R")
sink()
#--------------------------------->
sink(file.path("output", "out_2b_Empirical_More.txt"))
source("File_2b_Empirical_More.R")
#-------------------------------------------------------------------------------
influence_matrix_beta <- function(lmreg) {
# Extract the design matrix X and residuals u
X <- model.matrix(lmreg)
u <- residuals(lmreg)
# Calculate (X'X)^-1
# Using solve(qr(X)) or chol2inv is more numerically stable than solve(t(X) %*% X)
xtx_inv <- chol2inv(qr.R(lmreg$qr))
# The influence terms are (u_i * x_i) %*% (X'X)^-1
# We first scale the rows of X by the residuals
# Then post-multiply by the inverse of the gram matrix
M <- (X * u) %*% xtx_inv
return(M)
}
lm_obj <- lm(mpg ~ wt + hp, data = mtcars)
M <- influence_matrix_beta(lm_obj)
M
M
V_robust_manual <- crossprod(M)
all.equal(V_robust_manual, sandwich::vcovHC(lm_obj, type = "HC0"))
#-------------------------------------------------------------------------------
influence_matrix_beta <- function(lmreg) {
# Extract the design matrix X and residuals u
X <- model.matrix(lmreg)
u <- residuals(lmreg)
# Calculate (X'X)^-1
# Using solve(qr(X)) or chol2inv is more numerically stable than solve(t(X) %*% X)
xtx_inv <- chol2inv(qr.R(lmreg$qr))
# The influence terms are (u_i * x_i) %*% (X'X)^-1
# We first scale the rows of X by the residuals
# Then post-multiply by the inverse of the gram matrix
M <- (X * u) %*% xtx_inv
return(M)
}
lm_obj <- lm(mpg ~ wt + hp, data = mtcars)
M <- influence_matrix_beta(lm_obj)
V_robust_manual <- crossprod(M)
V_robust_manual
all.equal(V_robust_manual, sandwich::vcovHC(lm_obj, type = "HC0"))
sandwich::vcovHC(lm_obj, type = "HC0")
V_robust_manual==sandwich::vcovHC(lm_obj, type = "HC0")
V_robust_manual-sandwich::vcovHC(lm_obj, type = "HC0")
setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) #set working directory path to source file
#=========================================================================================>
# Authors: Gilles Koumou & Emmanuel Selorm Tsyawo
# Project: Difference-in-differences with as few as two cross-sectional units\\
#-- A new perspective to the democracy-growth debate
# Date begun: Sept. 13, 2022
# Date last updated:   Oct. 2, 2025
# Place:        Tuscaloosa
# Purpose: Empirical_Application; comparing to the Synthetic DiD, Classical Synthetic Controls
#           amd Augmented Synthetic Controls
#=========================================================================================>
rm(list=ls()) # clear entire memory
library(tseries)
library(lmtest)
library(sandwich)
library(synthdid) # devtools::install_github("synth-inference/synthdid")
library(dplyr)
library(tidyr)
library(Synth)
library(SCtools)
library(augsynth) # devtools::install_github("ebenmichael/augsynth")
library(dplyr)
library(tidyr)
#setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) #set working directory path to source file
#=========================================================================================>
source("File_0_Fonctions.R")
#=========================================================================================>
#load data:
dat<- read.table(file = "Daten_2015_USD_More.txt",sep = "\t" ,header = T)
head(dat)
dim(dat) #[1] 59 33
#remove countries with NA entries
id.cntry<- rep(TRUE,ncol(dat))
for (j in 3:ncol(dat)) {
if(any(is.na(dat[,j]))){id.cntry[j]=FALSE}
}
colnames(dat) #identify countries in the Sample
id.cntry[30] <- F #and drop Togo as well, already available in the other sample
id.cntry[1:2] <- F
colnames(dat)[id.cntry] #19 uncontaminated controls + Togo
#extract data file used in the first part of the empirical application
daten<- read.table(file = "Daten_2015_USD.txt",header = T)
head(daten)
dim(daten)
names(daten)[1:2]<- c("Period","Year_Code")
daten<- cbind(daten,dat[,id.cntry]) #merge both samples for more controls
head(daten)
colnames(daten) #need neat column names
countries <- c("Togo","Benin","Sudan","Kenya","Nigeria","Tanzania","Zambia","Zimbabwe",
"Burundi","Central African Republic","Malawi","Sierra Leone","Algeria",
"Cameroon","Chad","Gabon","Libya","Madagascar","Rwanda","Somalia","Seychelles")
colnames(daten)[-c(1,2)]<- countries
names(daten)
dim(daten)
colnames(daten)
#=========================================================================================>
#=========================================================================================>
# Synthetic DiD - Estimate & SE & CI
## Years (columns of Y) and unit order: controls first, Benin last
yrs <- as.integer(sub("^YR", "", daten$Year_Code))
stopifnot(length(unique(yrs)) == nrow(daten))
all_units <- colnames(daten)[-(1:2)]           # drop Period, Year_Code
controls  <- setdiff(all_units, "Benin")
units     <- c(controls, "Benin")              # <-- Benin LAST
## Build Y: rows = units, cols = time
Y <- t(sapply(units, function(u) daten[[u]]))
colnames(Y) <- yrs
mode(Y) <- "numeric"
## Keep only pre (<1990) and post (>=1993); drop transition (1990–1992)
keep_cols <- (yrs < 1990) | (yrs >= 1993)
Y <- Y[, keep_cols, drop = FALSE]
yrs_keep <- as.integer(colnames(Y))
## SDID inputs: N0 = #controls, T0 = #pre columns
N0 <- length(controls)
T0 <- sum(yrs_keep < 1990)
stopifnot(N0 >= 1, T0 >= 1, sum(yrs_keep >= 1993) >= 1)
## Estimate post-average ATT and get uncertainty
tau <- synthdid_estimate(Y, N0, T0)    # post-average ATT_{ω,T}
ATT_omega_T <- as.numeric(tau)
ATT_omega_T
summary(tau)
#confint(tau, level = 0.95)
plot(tau)
#obtain placebo-based standard errors
set.seed(0)
(se_pl <- try(synthdid::synthdid_se(tau, method = "placebo"), silent = TRUE))
# 1519.637
#extract pre-treatment weights
(lambda_lev <- attr(tau, "weights")$lambda)
pre_treatment_periods <- colnames(attr(tau, "setup")$Y)[seq_len(attr(tau, "setup")$T0)]
names(lambda_lev) <- pre_treatment_periods
print(lambda_lev)
#-------------------------------------------------------------------------------
Y_log <- log(Y); any(is.na(Y_log))
tau_log <- synthdid_estimate(Y_log, N0, T0)    # post-average ATT_{ω,T}
ATT_omega_T_log <- as.numeric(tau_log)
ATT_omega_T_log
summary(tau_log)
plot(tau_log)
set.seed(0)
(se_pl_log <- try(synthdid::synthdid_se(tau_log, method = "placebo"), silent = TRUE))
# 0.2820599
#obtain placebo-based standard errors
(lambda_log <- attr(tau_log, "weights")$lambda)
names(lambda_log) <- pre_treatment_periods
print(lambda_log)
#------------
# print out the time-weights for the level and log transformations
MM=cbind(pre_treatment_periods,lambda_lev,round(lambda_log,3))
colnames(MM)<- c("Year","Lambda_Level","Lambda_Log")
MM
#-------------------------------------------------------------------------------
cat("S-DiD results")
res.SDiD<- round(rbind(c(ATT_omega_T_log,ATT_omega_T),c(se_pl_log,se_pl)),3)
colnames(res.SDiD)<- c("att.log.GDP_pc","att.GDP_pc")
rownames(res.SDiD)<- c("Estimate","se")
print(res.SDiD)
#                 att.log.GDP_pc att.GDP_pc
# Estimate          0.249    101.582
# se                0.282   1519.637
#=========================================================================================>
#=========================================================================================>
# Classic SCM + placebo inference (Abadie et al. style)
## 1) Wide -> long, make plain data.frame with numeric ids
df_long <- daten %>%
transmute(time = as.integer(sub("^YR", "", Year_Code)),
across(-c(Period, Year_Code))) %>%
pivot_longer(cols = -time, names_to = "unit", values_to = "gdp_pc") %>%
mutate(unit = as.character(unit)) %>%
arrange(unit, time)
# strictly numeric id 1..J (not factor), and plain data.frame
unit_levels <- sort(unique(df_long$unit))
df_sc <- df_long %>%
mutate(unit_id = as.numeric(factor(unit, levels = unit_levels))) %>%
select(unit_id, time, gdp_pc) %>%                 # keep only what Synth needs
as.data.frame()
stopifnot(is.numeric(df_sc$unit_id), is.numeric(df_sc$time), is.numeric(df_sc$gdp_pc))
## 2) IDs for treated/control (use the numeric id!)
treated_name <- "Benin"
treated_id   <- match(treated_name, unit_levels)
controls_id  <- setdiff(seq_along(unit_levels), treated_id)
pre_years  <- sort(unique(df_long$time[df_long$time < 1990]))
post_years <- sort(unique(df_long$time[df_long$time >= 1993]))  # drop 1990–1992
#-------------------------------------------------------
## 3) dataprep (outcome-only SCM; include pre-mean as special predictor)
# outcome in levels
dp <- dataprep(
foo = df_sc,
predictors = NULL,
special.predictors = list(list("gdp_pc", pre_years, "mean")),
dependent     = "gdp_pc",
unit.variable = "unit_id",      # must be present & numeric in foo
time.variable = "time",
treatment.identifier = treated_id,
controls.identifier  = controls_id,
time.predictors.prior = pre_years,
time.optimize.ssr     = pre_years,
time.plot             = c(pre_years, post_years)
)
#-------------------------------------------------------
# outcome in logs
df_sc_log <- df_sc
df_sc_log$gdp_pc <- log(df_sc$gdp_pc)
dp_log <- dataprep(
foo = df_sc_log,
predictors = NULL,
special.predictors = list(list("gdp_pc", pre_years, "mean")),
dependent     = "gdp_pc",
unit.variable = "unit_id",      # must be present & numeric in foo
time.variable = "time",
treatment.identifier = treated_id,
controls.identifier  = controls_id,
time.predictors.prior = pre_years,
time.optimize.ssr     = pre_years,
time.plot             = c(pre_years, post_years)
)
## Helper: fit SCM for one treated id, return post-average gap and pre/post MSPE
scm_one <- function(df_sc, treated_id, controls_id, pre_years, post_years, Sigf.ipop = 2) {
dp <- dataprep(
foo = df_sc,
predictors = NULL,
special.predictors = list(list("gdp_pc", pre_years, "mean")),
dependent     = "gdp_pc",
unit.variable = "unit_id",
time.variable = "time",
treatment.identifier = treated_id,
controls.identifier  = controls_id,
time.predictors.prior = pre_years,
time.optimize.ssr     = pre_years,
time.plot             = c(pre_years, post_years)
)
sc <- synth(dp, Sigf.ipop = Sigf.ipop)
treated_path   <- dp$Y1plot
synthetic_path <- dp$Y0plot %*% sc$solution.w
gap <- as.numeric(treated_path - synthetic_path)
years <- dp$tag$time.plot
pre_idx  <- years %in% pre_years
post_idx <- years %in% post_years
pre_mspe  <- mean(gap[pre_idx]^2)
post_mspe <- mean(gap[post_idx]^2)
post_avg  <- mean(gap[post_idx])
list(post_avg = post_avg, pre_mspe = pre_mspe, post_mspe = post_mspe,
gap = gap, years = years)
}
## 1) Treated (Benin): ATT_{ω,T} and treated pre-MSPE
res_treated <- scm_one(df_sc, treated_id, controls_id, pre_years, post_years)
ATT_omega_T_SCM <- res_treated$post_avg
mspe_treated    <- res_treated$pre_mspe
cat("ATT_omega_T_SCM =", round(ATT_omega_T_SCM, 3), " ; pre-MSPE =", round(mspe_treated, 3), "\n")
unit_levels[controls_id] #list of control units
## 2) Placebos: loop over each donor as pseudo-treated, using remaining donors as controls
placebo_stats <- lapply(controls_id, function(pid) {
ctrls <- setdiff(controls_id, pid)           # donor pool excludes the pseudo-treated
try(scm_one(df_sc, pid, ctrls, pre_years, post_years), silent = TRUE)
})
## Keep only successful fits
ok <- vapply(placebo_stats, function(x) !inherits(x, "try-error"), logical(1))
placebo_stats <- placebo_stats[ok]
## Extract placebo post averages and their pre-MSPE
pl_post_avg <- sapply(placebo_stats, `[[`, "post_avg")
pl_pre_mspe <- sapply(placebo_stats, `[[`, "pre_mspe")
## 3) (Optional but recommended) Quality filter by pre-fit
##    Keep placebos whose pre-MSPE is not much worse than treated (e.g., <= 5x)
mspe_ratio <- pl_pre_mspe / mspe_treated
keep <- mspe_ratio <= 5            # adjust factor if you want more/less stringent
pl_post_avg_keep <- pl_post_avg[keep]
cat("Placebos kept for inference:", sum(keep), "of", length(pl_post_avg), "\n")
## 4) Randomization p-value (two-sided) using the kept placebos
pval <- mean(abs(pl_post_avg_keep) >= abs(ATT_omega_T_SCM))
cat("Randomization p-value (two-sided):", round(pval, 4), "\n")
#-------------------------------------------------------------------------------
## 1) Treated (Benin): ATT_{ω,T} and treated pre-MSPE
res_treated_log <- scm_one(df_sc_log, treated_id, controls_id, pre_years, post_years)
ATT_omega_T_SCM_log <- res_treated_log$post_avg
mspe_treated_log    <- res_treated_log$pre_mspe
cat("ATT_omega_T_SCM =", round(ATT_omega_T_SCM_log, 3), " ; pre-MSPE =", round(mspe_treated_log, 3), "\n")
## 2) Placebos: loop over each donor as pseudo-treated, using remaining donors as controls
placebo_stats <- lapply(controls_id, function(pid) {
ctrls <- setdiff(controls_id, pid)           # donor pool excludes the pseudo-treated
try(scm_one(df_sc_log, pid, ctrls, pre_years, post_years), silent = TRUE)
})
## Keep only successful fits
ok <- vapply(placebo_stats, function(x) !inherits(x, "try-error"), logical(1))
placebo_stats <- placebo_stats[ok]
## Extract placebo post averages and their pre-MSPE
pl_post_avg <- sapply(placebo_stats, `[[`, "post_avg")
pl_pre_mspe <- sapply(placebo_stats, `[[`, "pre_mspe")
## 3) (Optional but recommended) Quality filter by pre-fit
##    Keep placebos whose pre-MSPE is not much worse than treated (e.g., <= 5x)
mspe_ratio <- pl_pre_mspe / mspe_treated
keep <- mspe_ratio <= 5            # adjust factor if you want more/less stringent
pl_post_avg_keep <- pl_post_avg[keep]
cat("Placebos kept for inference:", sum(keep), "of", length(pl_post_avg), "\n")
## 4) Randomization p-value (two-sided) using the kept placebos
pval.log <- mean(abs(pl_post_avg_keep) >= abs(ATT_omega_T_SCM_log))
cat("Randomization p-value (two-sided):", round(pval.log, 4), "\n")
#-------------------------------------------------------------------------------
cat("SC results")
res.SC<- round(rbind(c(ATT_omega_T_SCM_log,ATT_omega_T_SCM),c(pval.log,pval)),3)
colnames(res.SC)<- c("att.log.GDP_pc","att.GDP_pc")
rownames(res.SC)<- c("Estimate","p_value")
print(res.SC)
#               att.log.GDP_pc att.GDP_pc
# Estimate           0.09    -43.968
# p_value            0.70      0.800
#==========================================================================================>
#==========================================================================================>
# Augmented Synthetic Control — estimate and CI
dat_as <- df_long %>%
filter(time %in% c(pre_years, post_years)) %>%
mutate(treat = as.integer(unit == "Benin" & time >= 1993))
#-------------------------------------------------------------------------------
# Fit (outcome-only)
as_out <- augsynth(gdp_pc ~ treat, unit = unit, time = time, data = dat_as,
scm = TRUE, progfunc = "None")
print(summary(as_out))   # sometimes reports avg ATT & SE
as_out <- augsynth(gdp_pc ~ treat, unit = unit, time = time, data = dat_as,
scm = TRUE, progfunc = "Ridge")
# testing the average post-treatment effect with reported p-value
set.seed(0)
(res.as_out<- summary(as_out,stat_func = function(x)abs(sum(x))))
res.as_out$average_att
ATT_avg_ASCM<- res.as_out$average_att$Estimate
(w_ascm_lev <- as_out$weights)    # unit weights on controls
round(w_ascm_lev,3)
#-------------------------------------------------------------------------------
## ---- Fit ASCM on log outcome -----------------------------------------------
stopifnot(all(dat_as$gdp_pc > 0))
dat_log <- mutate(dat_as, gdp_pc_log = log(gdp_pc))
as_log <- augsynth(gdp_pc_log ~ treat, unit = unit, time = time,
data = dat_log, scm = TRUE, progfunc = "Ridge")
set.seed(0)
(res.as_log<- summary(as_log,stat_func = function(x)abs(sum(x))))
res.as_log$average_att
(w_ascm_log <- as_log$weights)
round(w_ascm_log,3)
#-------------------------------------------------------------------------------
cat("ASC results")
res.ASC<- round(rbind(c(res.as_log$average_att$Estimate,
res.as_out$average_att$Estimate),
c(res.as_log$average_att$p_val,
res.as_out$average_att$p_val)),3)
colnames(res.ASC)<- c("att.log.GDP_pc","att.GDP_pc")
rownames(res.ASC)<- c("Estimate","se")
print(res.ASC)
#             att.log.GDP_pc att.GDP_pc
# Estimate          0.291    221.452
# se                0.100    107.072
#================================================================================>
# report benchmark 2015 results
res.ATT.level = c(ATT_omega_T,ATT_omega_T_SCM,ATT_avg_ASCM)
bench.2015=100*round(res.ATT.level/c(daten$Benin[which(daten$Period==2015)]),3)
print.fn(x=bench.2015,nsmall=1)
# 9.8 &-4.2 &23.9 \\
#================================================================================>
#================================================================================>
# Efficient T-DiD with Cameroon as an additional control
#-------------------------------------------------------------------------------
# Two-candidate control test (Togo & Cameroon) -- Level GDP per capita
(T_DiD_Togo_Cameroon_lev=ATT.fun(Y1=daten$Togo,Y0=daten$Cameroon,var.time = daten$Period,
time.treat = 1990:1992,t.X="Lag",se.type="NW",weight=T))
# the test ``passes", fails to reject zero ATT
(T_DiD_Togo_lev=ATT.fun(Y1=daten$Benin,Y0=daten$Togo,var.time = daten$Period,
time.treat = 1990:1992,t.X="Lag",se.type="NW",weight=T))
(T_DiD_Cameroon_lev=ATT.fun(Y1=daten$Benin,Y0=daten$Cameroon,var.time = daten$Period,
time.treat = 1990:1992,t.X="Lag",se.type="NW",weight=T))
# Construct optimal combination
IMB_Togo_lev<- influence_matrix_beta(T_DiD_Togo_lev$reg.Obj)
IMB_Cameroon_lev<- influence_matrix_beta(T_DiD_Cameroon_lev$reg.Obj)
S_omega_lev.n<- crossprod(cbind(IMB_Togo_lev[,2],IMB_Cameroon_lev[,2]))
Vec_2<- rep(1,2)
(h_2_lev_star<- solve(S_omega_lev.n)%*%Vec_2%*%(t(Vec_2)%*%solve(S_omega_lev.n)%*%Vec_2)^(-1))
sum(h_2_lev_star)#check
(est_lev_star<- t(h_2_lev_star)%*%c(T_DiD_Togo_lev$est.ATT,T_DiD_Cameroon_lev$est.ATT))
(se_lev_star<- sqrt( (t(Vec_2)%*%solve(S_omega_lev.n)%*%Vec_2)^(-1) ) )
(pval.lev_star = 2*pnorm(-abs(est_lev_star/se_lev_star)))
#-------------------------------------------------------------------------------
# Two-candidate control test (Togo & Cameroon) -- Level GDP per capita
(T_DiD_Togo_Cameroon_log=ATT.fun(Y1=daten$Togo,Y0=daten$Cameroon,var.time = daten$Period,
time.treat = 1990:1992,t.X="Lag",se.type="NW",transf=log,weight=T))
# the test ``passes", fails to reject zero ATT
(T_DiD_Togo_log=ATT.fun(Y1=daten$Benin,Y0=daten$Togo,var.time = daten$Period,
time.treat = 1990:1992,t.X="Lag",se.type="NW",transf=log,weight=T))
(T_DiD_Cameroon_log=ATT.fun(Y1=daten$Benin,Y0=daten$Cameroon,var.time = daten$Period,
time.treat = 1990:1992,t.X="Lag",se.type="NW",transf=log,weight=T))
# Construct optimal combination
IMB_Togo_log<- influence_matrix_beta(T_DiD_Togo_log$reg.Obj)
IMB_Cameroon_log<- influence_matrix_beta(T_DiD_Cameroon_log$reg.Obj)
S_omega_log.n<- crossprod(cbind(IMB_Togo_log[,2],IMB_Cameroon_log[,2]))
Vec_2<- rep(1,2)
(h_2_log_star<- solve(S_omega_log.n)%*%Vec_2%*%(t(Vec_2)%*%solve(S_omega_log.n)%*%Vec_2)^(-1))
sum(h_2_log_star)#check
(est_log_star<- t(h_2_log_star)%*%c(T_DiD_Togo_log$est.ATT,T_DiD_Cameroon_log$est.ATT))
(se_log_star<- sqrt( (t(Vec_2)%*%solve(S_omega_log.n)%*%Vec_2)^(-1) ) )
(pval.log_star = 2*pnorm(-abs(est_log_star/se_log_star)))
#================================================================================>
#report results
res_T_DiD<- round(cbind(c(est_log_star,se_log_star,pval.log_star,T_DiD_Togo_Cameroon_log$pval.ATT),
c(est_lev_star,se_lev_star,pval.lev_star,T_DiD_Togo_Cameroon_lev$pval.ATT)),3)
colnames(res_T_DiD)<- c("log","level")
rownames(res_T_DiD)<- c("est", "se","pval.est","pval.identif")
res_T_DiD
#=======================================================================================>
#=======================================================================================>
#=========================================================================================>
# Authors: Gilles Koumou & Emmanuel Selorm Tsyawo
# Project: Difference-in-differences with as few as two cross-sectional units\\
#-- A new perspective to the democracy-growth debate
# Date begun: Sept. 13, 2022
# Date last updated:   Oct. 2, 2025
# Place:        Tuscaloosa
# Purpose: Main file for executing all code
#=========================================================================================>
rm(list=ls()) # clear entire memory
# library(tseries)
# library(lmtest)
# library(sandwich)
# library(parallel)
# library(pbapply)
# library(nlme)
setwd(dirname(rstudioapi::getActiveDocumentContext()$path)) #set working directory path to source file
if (!dir.exists("output")) {
dir.create("output", recursive = TRUE)
}
sink(file.path("output", "out_2b_Empirical_More.txt"))
source("File_2b_Empirical_More.R")
sink()
